[IA64] assign_new_domain_page now calls assign_domain_page
authorawilliam@xenbuild.aw <awilliam@xenbuild.aw>
Fri, 24 Mar 2006 17:41:48 +0000 (10:41 -0700)
committerawilliam@xenbuild.aw <awilliam@xenbuild.aw>
Fri, 24 Mar 2006 17:41:48 +0000 (10:41 -0700)
assign_new_domain_page inlines assign_new_domain0_page and calls
assign_domain_page.
cleanup in asm-ia64/xenprocessor.h

Signed-off-by: Tristan Gingold <tristan.gingold@bull.net>
xen/arch/ia64/xen/domain.c
xen/include/asm-ia64/xenprocessor.h

index 372566f84b8b23f3f805a54e3e188f67a8ccb966..b1c811d40289737652c7144c0879585149ed6e0e 100644 (file)
@@ -488,80 +488,46 @@ void new_thread(struct vcpu *v,
        }
 }
 
-static struct page_info * assign_new_domain0_page(unsigned long mpaddr)
-{
-       if (mpaddr < dom0_start || mpaddr >= dom0_start + dom0_size) {
-               printk("assign_new_domain0_page: bad domain0 mpaddr 0x%lx!\n",mpaddr);
-               printk("assign_new_domain0_page: start=0x%lx,end=0x%lx!\n",
-                       dom0_start, dom0_start+dom0_size);
-               while(1);
-       }
-       return mfn_to_page((mpaddr >> PAGE_SHIFT));
-}
 
-/* allocate new page for domain and map it to the specified metaphysical addr */
+/* Allocate a new page for domain and map it to the specified metaphysical 
+   address.  */
 static struct page_info * assign_new_domain_page(struct domain *d, unsigned long mpaddr)
 {
-       struct mm_struct *mm = d->arch.mm;
-       struct page_info *pt, *p = (struct page_info *)0;
-       pgd_t *pgd;
-       pud_t *pud;
-       pmd_t *pmd;
-       pte_t *pte;
+       unsigned long maddr;
+       struct page_info *p;
 
-       if (!mm->pgd) {
-               printk("assign_new_domain_page: domain pgd must exist!\n");
-               return(p);
-       }
-       pgd = pgd_offset(mm,mpaddr);
-       if (pgd_none(*pgd))
-       {
-               pgd_populate(mm, pgd, pud_alloc_one(mm,mpaddr));
-               pt = maddr_to_page(pgd_val(*pgd));
-               list_add_tail(&pt->list, &d->arch.mm->pt_list);
+#ifdef CONFIG_DOMAIN0_CONTIGUOUS
+       if (d == dom0) {
+               if (mpaddr < dom0_start || mpaddr >= dom0_start + dom0_size) {
+                       /* FIXME: is it true ?
+                          dom0 memory is not contiguous!  */
+                       printk("assign_new_domain_page: bad domain0 "
+                              "mpaddr=%lx, start=%lx, end=%lx!\n",
+                              mpaddr, dom0_start, dom0_start+dom0_size);
+                       while(1);
+               }
+               p = mfn_to_page((mpaddr >> PAGE_SHIFT));
        }
-
-       pud = pud_offset(pgd, mpaddr);
-       if (pud_none(*pud))
+       else
+#endif
        {
-               pud_populate(mm, pud, pmd_alloc_one(mm,mpaddr));
-               pt = maddr_to_page(pud_val(*pud));
-               list_add_tail(&pt->list, &d->arch.mm->pt_list);
+               p = alloc_domheap_page(d);
+               // zero out pages for security reasons
+               if (p) memset(__va(page_to_maddr(p)),0,PAGE_SIZE);
        }
-
-       pmd = pmd_offset(pud, mpaddr);
-       if (pmd_none(*pmd))
-       {
-               pmd_populate_kernel(mm, pmd, pte_alloc_one_kernel(mm,mpaddr));
-//             pmd_populate(mm, pmd, pte_alloc_one(mm,mpaddr));
-               pt = maddr_to_page(pmd_val(*pmd));
-               list_add_tail(&pt->list, &d->arch.mm->pt_list);
+       if (unlikely(!p)) {
+               printf("assign_new_domain_page: Can't alloc!!!! Aaaargh!\n");
+               return(p);
        }
-
-       pte = pte_offset_map(pmd, mpaddr);
-       if (pte_none(*pte)) {
-#ifdef CONFIG_DOMAIN0_CONTIGUOUS
-               if (d == dom0) p = assign_new_domain0_page(mpaddr);
-               else
-#endif
-               {
-                       p = alloc_domheap_page(d);
-                       // zero out pages for security reasons
-                       if (p) memset(__va(page_to_maddr(p)),0,PAGE_SIZE);
-               }
-               if (unlikely(!p)) {
-                       printf("assign_new_domain_page: Can't alloc!!!! Aaaargh!\n");
-                       return(p);
-               }
-               if (unlikely(page_to_maddr(p) > __get_cpu_var(vhpt_paddr)
-                            && page_to_maddr(p) < __get_cpu_var(vhpt_pend))) {
-                       printf("assign_new_domain_page: reassigned vhpt page %lx!!\n",
-                               page_to_maddr(p));
-               }
-               set_pte(pte, pfn_pte(page_to_maddr(p) >> PAGE_SHIFT,
-                       __pgprot(__DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RWX)));
+       maddr = page_to_maddr (p);
+       if (unlikely(maddr > __get_cpu_var(vhpt_paddr)
+                    && maddr < __get_cpu_var(vhpt_pend))) {
+               /* FIXME: how can this happen ?
+                  vhpt is allocated by alloc_domheap_page.  */
+               printf("assign_new_domain_page: reassigned vhpt page %lx!!\n",
+                      maddr);
        }
-       else printk("assign_new_domain_page: mpaddr %lx already mapped!\n",mpaddr);
+       assign_domain_page (d, mpaddr, maddr);
        return p;
 }
 
index 62c0459360112ee78a2f849b364b3806cd7cbbb1..a5d2e9011fad646e69d2b62b618b3f17f04e548d 100644 (file)
@@ -172,14 +172,6 @@ typedef union {
     };
 } tpr_t;
 
-#define IA64_ISR_CODE_MASK0     0xf
-#define IA64_UNIMPL_DADDR_FAULT     0x30
-#define IA64_UNIMPL_IADDR_TRAP      0x10
-#define IA64_RESERVED_REG_FAULT     0x30
-#define IA64_REG_NAT_CONSUMPTION_FAULT  0x10
-#define IA64_NAT_CONSUMPTION_FAULT  0x20
-#define IA64_PRIV_OP_FAULT      0x10
-
 /* indirect register type */
 enum {
     IA64_CPUID,     /*  cpuid */